Now _raw_spin_lock() checks whether interrupt is masked or not.
If masked, it panics.
lock_ipi_calllock() violates the assumption.
This patch make lock_ipi_calllock() use spin_lock_irqsave()
instead of spin_lock_irq().
Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
extern void cpu_halt (void);
+#ifdef XEN
+/* work around for spinlock irq check. */
+void
+lock_ipi_calllock(unsigned long *flags)
+{
+ spin_lock_irqsave(&call_lock, *flags);
+}
+
+void
+unlock_ipi_calllock(unsigned long flags)
+{
+ spin_unlock_irqrestore(&call_lock, flags);
+}
+#else
void
lock_ipi_calllock(void)
{
{
spin_unlock_irq(&call_lock);
}
+#endif
static void
stop_this_cpu (void)
static void __devinit
smp_callin (void)
{
+#ifdef XEN
+ /* work around for spinlock irq assert. */
+ unsigned long flags;
+#endif
int cpuid, phys_id;
extern void ia64_init_itm(void);
fix_b0_for_bsp();
+#ifdef XEN
+ lock_ipi_calllock(&flags);
+#else
lock_ipi_calllock();
+#endif
cpu_set(cpuid, cpu_online_map);
+#ifdef XEN
+ unlock_ipi_calllock(flags);
+#else
unlock_ipi_calllock();
+#endif
per_cpu(cpu_state, cpuid) = CPU_ONLINE;
smp_setup_percpu_timer();
extern int smp_call_function_single (int cpuid, void (*func) (void *info), void *info,
int retry, int wait);
extern void smp_send_reschedule (int cpu);
+#ifdef XEN
+extern void lock_ipi_calllock(unsigned long *flags);
+extern void unlock_ipi_calllock(unsigned long flags);
+#else
extern void lock_ipi_calllock(void);
extern void unlock_ipi_calllock(void);
+#endif
extern void identify_siblings (struct cpuinfo_ia64 *);
#else